-
Notifications
You must be signed in to change notification settings - Fork 15.2k
[Flang][OpenMP][Taskloop] Translation support for taskloop construct #166903
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Conversation
|
@llvm/pr-subscribers-flang-fir-hlfir @llvm/pr-subscribers-mlir-llvm Author: Kaviya Rajendiran (kaviya2510) ChangesAdded translation support for taskloop construct. Patch is 32.13 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/166903.diff 6 Files Affected:
diff --git a/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h b/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h
index b3d7ab4acf303..18828380abd32 100644
--- a/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h
+++ b/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h
@@ -1359,6 +1359,22 @@ class OpenMPIRBuilder {
: DepKind(DepKind), DepValueType(DepValueType), DepVal(DepVal) {}
};
+ /// Generator for `#omp taskloop`
+ ///
+ /// \param Loc The location where the taskloop construct was encountered.
+ /// \param AllocaIP The insertion point to be used for alloca instructions.
+ /// \param BodyGenCB Callback that will generate the region code.
+ /// \param LoopInfo Callback that return the CLI
+ /// \param LBVal Lowerbound value of loop
+ /// \param UBVal Upperbound value of loop
+ /// \param StepVal Step value of loop
+ /// \param Tied True if the task is tied, false if the task is untied.
+ LLVM_ABI InsertPointOrErrorTy createTaskloop(
+ const LocationDescription &Loc, InsertPointTy AllocaIP,
+ BodyGenCallbackTy BodyGenCB,
+ llvm::function_ref<llvm::Expected<llvm::CanonicalLoopInfo *>()> LoopInfo,
+ Value *LBVal, Value *UBVal, Value *StepVal, bool Tied = true);
+
/// Generator for `#omp task`
///
/// \param Loc The location where the task construct was encountered.
diff --git a/llvm/include/llvm/Frontend/OpenMP/OMPKinds.def b/llvm/include/llvm/Frontend/OpenMP/OMPKinds.def
index 46b3d53a4b408..032495dfe9d61 100644
--- a/llvm/include/llvm/Frontend/OpenMP/OMPKinds.def
+++ b/llvm/include/llvm/Frontend/OpenMP/OMPKinds.def
@@ -95,6 +95,7 @@ __OMP_STRUCT_TYPE(KernelArgs, __tgt_kernel_arguments, false, Int32, Int32, VoidP
__OMP_STRUCT_TYPE(AsyncInfo, __tgt_async_info, false, Int8Ptr)
__OMP_STRUCT_TYPE(DependInfo, kmp_dep_info, false, SizeTy, SizeTy, Int8)
__OMP_STRUCT_TYPE(Task, kmp_task_ompbuilder_t, false, VoidPtr, VoidPtr, Int32, VoidPtr, VoidPtr)
+__OMP_STRUCT_TYPE(Taskloop, kmp_task_info, false, VoidPtr, VoidPtr, Int32, VoidPtr, VoidPtr, Int64, Int64, Int64)
__OMP_STRUCT_TYPE(ConfigurationEnvironment, ConfigurationEnvironmentTy, false,
Int8, Int8, Int8, Int32, Int32, Int32, Int32, Int32, Int32)
__OMP_STRUCT_TYPE(DynamicEnvironment, DynamicEnvironmentTy, false, Int16)
diff --git a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
index fff9a815e5368..e88e722b1370e 100644
--- a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
+++ b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
@@ -1933,6 +1933,205 @@ static Value *emitTaskDependencies(
return DepArray;
}
+OpenMPIRBuilder::InsertPointOrErrorTy OpenMPIRBuilder::createTaskloop(
+ const LocationDescription &Loc, InsertPointTy AllocaIP,
+ BodyGenCallbackTy BodyGenCB,
+ llvm::function_ref<llvm::Expected<llvm::CanonicalLoopInfo *>()> loopInfo,
+ Value *LBVal, Value *UBVal, Value *StepVal, bool Tied) {
+
+ if (!updateToLocation(Loc))
+ return InsertPointTy();
+
+ uint32_t SrcLocStrSize;
+ Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
+ Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
+
+ BasicBlock *TaskloopExitBB =
+ splitBB(Builder, /*CreateBranch=*/true, "taskloop.exit");
+ BasicBlock *TaskloopBodyBB =
+ splitBB(Builder, /*CreateBranch=*/true, "taskloop.body");
+ BasicBlock *TaskloopAllocaBB =
+ splitBB(Builder, /*CreateBranch=*/true, "taskloop.alloca");
+
+ InsertPointTy TaskloopAllocaIP =
+ InsertPointTy(TaskloopAllocaBB, TaskloopAllocaBB->begin());
+ InsertPointTy TaskloopBodyIP =
+ InsertPointTy(TaskloopBodyBB, TaskloopBodyBB->begin());
+
+ if (Error Err = BodyGenCB(TaskloopAllocaIP, TaskloopBodyIP))
+ return Err;
+
+ llvm::Expected<llvm::CanonicalLoopInfo *> result = loopInfo();
+ if (!result) {
+ return result.takeError();
+ }
+
+ llvm::CanonicalLoopInfo *CLI = result.get();
+ OutlineInfo OI;
+ OI.EntryBB = TaskloopAllocaBB;
+ OI.OuterAllocaBB = AllocaIP.getBlock();
+ OI.ExitBB = TaskloopExitBB;
+
+ // Add the thread ID argument.
+ SmallVector<Instruction *, 4> ToBeDeleted;
+ // dummy instruction to be used as a fake argument
+ OI.ExcludeArgsFromAggregate.push_back(createFakeIntVal(
+ Builder, AllocaIP, ToBeDeleted, TaskloopAllocaIP, "global.tid", false));
+
+ OI.PostOutlineCB = [this, Ident, LBVal, UBVal, StepVal, Tied,
+ TaskloopAllocaBB, CLI, Loc,
+ ToBeDeleted](Function &OutlinedFn) mutable {
+ // Replace the Stale CI by appropriate RTL function call.
+ assert(OutlinedFn.hasOneUse() &&
+ "there must be a single user for the outlined function");
+ CallInst *StaleCI = cast<CallInst>(OutlinedFn.user_back());
+
+ // HasShareds is true if any variables are captured in the outlined region,
+ // false otherwise.
+ bool HasShareds = StaleCI->arg_size() > 1;
+ Builder.SetInsertPoint(StaleCI);
+
+ // Gather the arguments for emitting the runtime call for
+ // @__kmpc_omp_task_alloc
+ Function *TaskAllocFn =
+ getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_omp_task_alloc);
+
+ Value *ThreadID = getOrCreateThreadID(Ident);
+
+ // Emit runtime call for @__kmpc_taskgroup
+ Function *TaskgroupFn =
+ getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_taskgroup);
+ Builder.CreateCall(TaskgroupFn, {Ident, ThreadID});
+
+ // The flags are set to 1 if the task is tied, 0 otherwise.
+ Value *Flags = Builder.getInt32(Tied);
+
+ Value *TaskSize = Builder.getInt64(
+ divideCeil(M.getDataLayout().getTypeSizeInBits(Taskloop), 8));
+
+ Value *SharedsSize = Builder.getInt64(0);
+ if (HasShareds) {
+ AllocaInst *ArgStructAlloca =
+ dyn_cast<AllocaInst>(StaleCI->getArgOperand(1));
+ assert(ArgStructAlloca &&
+ "Unable to find the alloca instruction corresponding to arguments "
+ "for extracted function");
+ StructType *ArgStructType =
+ dyn_cast<StructType>(ArgStructAlloca->getAllocatedType());
+ assert(ArgStructType && "Unable to find struct type corresponding to "
+ "arguments for extracted function");
+ SharedsSize =
+ Builder.getInt64(M.getDataLayout().getTypeStoreSize(ArgStructType));
+ }
+
+ // Emit the @__kmpc_omp_task_alloc runtime call
+ // The runtime call returns a pointer to an area where the task captured
+ // variables must be copied before the task is run (TaskData)
+ CallInst *TaskData = Builder.CreateCall(
+ TaskAllocFn, {/*loc_ref=*/Ident, /*gtid=*/ThreadID, /*flags=*/Flags,
+ /*sizeof_task=*/TaskSize, /*sizeof_shared=*/SharedsSize,
+ /*task_func=*/&OutlinedFn});
+
+ // Get the pointer to loop lb, ub, step from task ptr
+ // and set up the lowerbound,upperbound and step values
+ llvm::Value *lb =
+ Builder.CreateStructGEP(OpenMPIRBuilder::Taskloop, TaskData, 5);
+ // Value *LbVal_ext = Builder.CreateSExt(LBVal, Builder.getInt64Ty());
+ Builder.CreateStore(LBVal, lb);
+
+ llvm::Value *ub =
+ Builder.CreateStructGEP(OpenMPIRBuilder::Taskloop, TaskData, 6);
+ Builder.CreateStore(UBVal, ub);
+
+ llvm::Value *step =
+ Builder.CreateStructGEP(OpenMPIRBuilder::Taskloop, TaskData, 7);
+ Value *Step_ext = Builder.CreateSExt(StepVal, Builder.getInt64Ty());
+ Builder.CreateStore(Step_ext, step);
+ llvm::Value *loadstep = Builder.CreateLoad(Builder.getInt64Ty(), step);
+
+ if (HasShareds) {
+ Value *Shareds = StaleCI->getArgOperand(1);
+ Align Alignment = TaskData->getPointerAlignment(M.getDataLayout());
+ Value *TaskShareds = Builder.CreateLoad(VoidPtr, TaskData);
+ Builder.CreateMemCpy(TaskShareds, Alignment, Shareds, Alignment,
+ SharedsSize);
+ }
+
+ // set up the arguments for emitting kmpc_taskloop runtime call
+ // setting default values for ifval, nogroup, sched, grainsize, task_dup
+ Value *IfVal = Builder.getInt32(1);
+ Value *NoGroup = Builder.getInt32(1);
+ Value *Sched = Builder.getInt32(0);
+ Value *GrainSize = Builder.getInt64(0);
+ Value *TaskDup = Constant::getNullValue(Builder.getPtrTy());
+
+ Value *Args[] = {Ident, ThreadID, TaskData, IfVal, lb, ub,
+ loadstep, NoGroup, Sched, GrainSize, TaskDup};
+
+ // taskloop runtime call
+ Function *TaskloopFn =
+ getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_taskloop);
+ Builder.CreateCall(TaskloopFn, Args);
+
+ // Emit the @__kmpc_end_taskgroup runtime call to end the taskgroup
+ Function *EndTaskgroupFn =
+ getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_taskgroup);
+ Builder.CreateCall(EndTaskgroupFn, {Ident, ThreadID});
+
+ StaleCI->eraseFromParent();
+
+ Builder.SetInsertPoint(TaskloopAllocaBB, TaskloopAllocaBB->begin());
+
+ if (HasShareds) {
+ LoadInst *Shareds = Builder.CreateLoad(VoidPtr, OutlinedFn.getArg(1));
+ OutlinedFn.getArg(1)->replaceUsesWithIf(
+ Shareds, [Shareds](Use &U) { return U.getUser() != Shareds; });
+ }
+
+ Value *IV = CLI->getIndVar();
+ Type *IVTy = IV->getType();
+ Constant *One = ConstantInt::get(IVTy, 1);
+
+ Value *task_lb = Builder.CreateStructGEP(OpenMPIRBuilder::Taskloop,
+ OutlinedFn.getArg(1), 5, "gep_lb");
+ Value *LowerBound = Builder.CreateLoad(IVTy, task_lb, "lb");
+
+ Value *task_ub = Builder.CreateStructGEP(OpenMPIRBuilder::Taskloop,
+ OutlinedFn.getArg(1), 6, "gep_ub");
+ Value *UpperBound = Builder.CreateLoad(IVTy, task_ub, "ub");
+
+ Builder.SetInsertPoint(CLI->getPreheader()->getTerminator());
+
+ Value *TripCountMinusOne = Builder.CreateSub(UpperBound, LowerBound);
+ Value *TripCount = Builder.CreateAdd(TripCountMinusOne, One, "trip_cnt");
+ // set the trip count in the CLI
+ CLI->setTripCount(TripCount);
+
+ Builder.SetInsertPoint(CLI->getBody(),
+ CLI->getBody()->getFirstInsertionPt());
+
+ llvm::BasicBlock *Body = CLI->getBody();
+ for (llvm::Instruction &I : *Body) {
+ if (auto *Add = llvm::dyn_cast<llvm::BinaryOperator>(&I)) {
+ if (Add->getOpcode() == llvm::Instruction::Add) {
+ if (llvm::isa<llvm::BinaryOperator>(Add->getOperand(0))) {
+ // update the starting index of the loop
+ Add->setOperand(1, LowerBound);
+ }
+ }
+ }
+ }
+
+ for (Instruction *I : llvm::reverse(ToBeDeleted)) {
+ I->eraseFromParent();
+ }
+ };
+
+ addOutlineInfo(std::move(OI));
+ Builder.SetInsertPoint(TaskloopExitBB, TaskloopExitBB->begin());
+ return Builder.saveIP();
+}
+
OpenMPIRBuilder::InsertPointOrErrorTy OpenMPIRBuilder::createTask(
const LocationDescription &Loc, InsertPointTy AllocaIP,
BodyGenCallbackTy BodyGenCB, bool Tied, Value *Final, Value *IfCondition,
diff --git a/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp b/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp
index 8edec990eaaba..d69fcd3db0413 100644
--- a/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp
+++ b/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp
@@ -323,6 +323,18 @@ static LogicalResult checkImplementationStatus(Operation &op) {
if (op.getDistScheduleChunkSize())
result = todo("dist_schedule with chunk_size");
};
+ auto checkFinal = [&todo](auto op, LogicalResult &result) {
+ if (op.getFinal())
+ result = todo("final");
+ };
+ auto checkGrainsize = [&todo](auto op, LogicalResult &result) {
+ if (op.getGrainsize())
+ result = todo("grainsize");
+ };
+ auto checkIf = [](auto op, LogicalResult &) {
+ if (op.getIfExpr())
+ op.emitWarning("if");
+ };
auto checkHint = [](auto op, LogicalResult &) {
if (op.getHint())
op.emitWarning("hint clause discarded");
@@ -340,10 +352,22 @@ static LogicalResult checkImplementationStatus(Operation &op) {
if (!op.getLinearVars().empty() || !op.getLinearStepVars().empty())
result = todo("linear");
};
+ auto checkMergeable = [&todo](auto op, LogicalResult &result) {
+ if (op.getMergeable())
+ result = todo("mergeable");
+ };
+ auto checkNogroup = [&todo](auto op, LogicalResult &result) {
+ if (op.getNogroup())
+ result = todo("nogroup");
+ };
auto checkNowait = [&todo](auto op, LogicalResult &result) {
if (op.getNowait())
result = todo("nowait");
};
+ auto checkNumTasks = [&todo](auto op, LogicalResult &result) {
+ if (op.getNumTasks())
+ result = todo("num_tasks");
+ };
auto checkOrder = [&todo](auto op, LogicalResult &result) {
if (op.getOrder() || op.getOrderMod())
result = todo("order");
@@ -417,7 +441,15 @@ static LogicalResult checkImplementationStatus(Operation &op) {
checkNowait(op, result);
})
.Case([&](omp::TaskloopOp op) {
- // TODO: Add other clauses check
+ checkAllocate(op, result);
+ checkFinal(op, result);
+ checkGrainsize(op, result);
+ checkIf(op, result);
+ checkInReduction(op, result);
+ checkMergeable(op, result);
+ checkNogroup(op, result);
+ checkNumTasks(op, result);
+ checkReduction(op, result);
checkUntied(op, result);
checkPriority(op, result);
})
@@ -2097,6 +2129,8 @@ class TaskContextStructManager {
/// private decls.
void createGEPsToPrivateVars();
+ llvm::Value *isAllocated();
+
/// De-allocate the task context structure.
void freeStructPtr();
@@ -2177,13 +2211,26 @@ void TaskContextStructManager::createGEPsToPrivateVars() {
}
}
+llvm::Value *TaskContextStructManager::isAllocated() {
+ if (!structPtr)
+ return nullptr;
+
+ return builder.CreateIsNotNull(structPtr);
+}
+
void TaskContextStructManager::freeStructPtr() {
if (!structPtr)
return;
llvm::IRBuilderBase::InsertPointGuard guard{builder};
- // Ensure we don't put the call to free() after the terminator
- builder.SetInsertPoint(builder.GetInsertBlock()->getTerminator());
+ llvm::BasicBlock *currentBlock = builder.GetInsertBlock();
+ if (currentBlock->getTerminator()) {
+ // Ensure we don't put the call to free() after the terminator
+ builder.SetInsertPoint(currentBlock->getTerminator());
+ } else {
+ // Insert the call to free() at the end of the current block
+ builder.SetInsertPoint(currentBlock);
+ }
builder.CreateFree(structPtr);
}
@@ -2419,6 +2466,207 @@ convertOmpTaskOp(omp::TaskOp taskOp, llvm::IRBuilderBase &builder,
return success();
}
+// Converts an OpenMP taskloop construct into LLVM IR using OpenMPIRBuilder.
+static LogicalResult
+convertOmpTaskloopOp(Operation &opInst, llvm::IRBuilderBase &builder,
+ LLVM::ModuleTranslation &moduleTranslation) {
+ using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
+ auto taskloopOp = cast<omp::TaskloopOp>(opInst);
+ if (failed(checkImplementationStatus(opInst)))
+ return failure();
+
+ // It stores the pointer of allocated firstprivate copies,
+ // which can be used later for freeing the allocated space.
+ SmallVector<llvm::Value *> llvmFirstPrivateVars;
+ PrivateVarsInfo privateVarsInfo(taskloopOp);
+ TaskContextStructManager taskStructMgr{builder, moduleTranslation,
+ privateVarsInfo.privatizers};
+
+ llvm::OpenMPIRBuilder::InsertPointTy allocaIP =
+ findAllocaInsertPoint(builder, moduleTranslation);
+
+ assert(builder.GetInsertPoint() == builder.GetInsertBlock()->end());
+ llvm::BasicBlock *taskloopStartBlock = llvm::BasicBlock::Create(
+ builder.getContext(), "omp.taskloop.start",
+ /*Parent=*/builder.GetInsertBlock()->getParent());
+ llvm::Instruction *branchToTaskloopStartBlock =
+ builder.CreateBr(taskloopStartBlock);
+ builder.SetInsertPoint(branchToTaskloopStartBlock);
+
+ llvm::BasicBlock *copyBlock =
+ splitBB(builder, /*CreateBranch=*/true, "omp.private.copy");
+ llvm::BasicBlock *initBlock =
+ splitBB(builder, /*CreateBranch=*/true, "omp.private.init");
+
+ LLVM::ModuleTranslation::SaveStack<OpenMPAllocaStackFrame> frame(
+ moduleTranslation, allocaIP);
+
+ // Allocate and initialize private variables
+ builder.SetInsertPoint(initBlock->getTerminator());
+
+ taskStructMgr.generateTaskContextStruct();
+ taskStructMgr.createGEPsToPrivateVars();
+
+ llvmFirstPrivateVars.resize(privateVarsInfo.blockArgs.size());
+ int index = 0;
+
+ for (auto [privDecl, mlirPrivVar, blockArg, llvmPrivateVarAlloc] :
+ llvm::zip_equal(privateVarsInfo.privatizers, privateVarsInfo.mlirVars,
+ privateVarsInfo.blockArgs,
+ taskStructMgr.getLLVMPrivateVarGEPs())) {
+ // To be handled inside the taskloop.
+ if (!privDecl.readsFromMold())
+ continue;
+ assert(llvmPrivateVarAlloc &&
+ "reads from mold so shouldn't have been skipped");
+
+ llvm::Expected<llvm::Value *> privateVarOrErr =
+ initPrivateVar(builder, moduleTranslation, privDecl, mlirPrivVar,
+ blockArg, llvmPrivateVarAlloc, initBlock);
+ if (!privateVarOrErr)
+ return handleError(privateVarOrErr, *taskloopOp.getOperation());
+
+ llvmFirstPrivateVars[index++] = privateVarOrErr.get();
+
+ llvm::IRBuilderBase::InsertPointGuard guard(builder);
+ builder.SetInsertPoint(builder.GetInsertBlock()->getTerminator());
+
+ if ((privateVarOrErr.get() != llvmPrivateVarAlloc) &&
+ !mlir::isa<LLVM::LLVMPointerType>(blockArg.getType())) {
+ builder.CreateStore(privateVarOrErr.get(), llvmPrivateVarAlloc);
+ // Load it so we have the value pointed to by the GEP
+ llvmPrivateVarAlloc = builder.CreateLoad(privateVarOrErr.get()->getType(),
+ llvmPrivateVarAlloc);
+ }
+ assert(llvmPrivateVarAlloc->getType() ==
+ moduleTranslation.convertType(blockArg.getType()));
+ }
+
+ // firstprivate copy region
+ setInsertPointForPossiblyEmptyBlock(builder, copyBlock);
+ if (failed(copyFirstPrivateVars(
+ taskloopOp, builder, moduleTranslation, privateVarsInfo.mlirVars,
+ taskStructMgr.getLLVMPrivateVarGEPs(), privateVarsInfo.privatizers,
+ taskloopOp.getPrivateNeedsBarrier())))
+ return llvm::failure();
+
+ // Set up inserttion point for call to createTaskloop()
+ builder.SetInsertPoint(taskloopStartBlock);
+
+ auto bodyCB = [&](InsertPointTy allocaIP,
+ InsertPointTy codegenIP) -> llvm::Error {
+ // Save the alloca insertion point on ModuleTranslation stack for use in
+ // nested regions.
+ LLVM::ModuleTranslation::SaveStack<OpenMPAllocaStackFrame> frame(
+ moduleTranslation, allocaIP);
+
+ // translate the body of the taskloop:
+ builder.restoreIP(codegenIP);
+
+ llvm::BasicBlock *privInitBlock = nullptr;
+ privateVarsInfo.llvmVars.resize(privateVarsInfo.blockArgs.size());
+ for (auto [i, zip] : llvm::enumerate(llvm::zip_equal(
+ privateVarsInfo.blockArgs, privateVarsInfo.privatizers,
+ privateVarsInfo.mlirVars))) {
+ auto [blockArg, privDecl, mlirPrivVar] = zip;
+ // This is handled before the task executes
+ if (privDecl.readsFromMold())
+ continue;
+
+ llvm::IRBuilderBase::InsertPointGuard guard(builder);
+ llvm::Type *llvmAllocType =
+ moduleTranslation.convertType(privDecl.getType());
+ builder.SetInsertPoint(allocaIP.getBlock()->getTerminator());
+ llvm::Value *llvmPrivateVar = builder.CreateAlloca(
+ llvmAllocType, /*ArraySize=*/nullptr, "omp.private.alloc");
+
+ llvm::Expected<llvm::Value *> privateVarOrError =
+ initPrivateVar(builder, moduleTranslation, privDecl, mlirPrivVar,
+ blockArg, llvmPrivateVar, privInitBlock);
+ if (!privateVarOrError)
+ return privateVarOrError.takeError();
+ moduleTranslation.mapValue(blockArg, privateVarOrError.get());
+ privateVarsInfo.llvmVars[i] = privateVarOrError.get();
+ // Add private var to llvmFirstPrivateVars
+ llvmFirstPrivateVars[index++] = privateVarOrError.get();
+ }
+
+ taskStructMgr.createGEPsToPrivateVars();
+ for (auto [i, llvmPrivVar] :
+ llvm::enumerate(taskStructMgr.getLLVMPrivateVarGEPs())) {
+ if (!llvmPrivVar) {
...
[truncated]
|
|
@llvm/pr-subscribers-flang-openmp Author: Kaviya Rajendiran (kaviya2510) ChangesAdded translation support for taskloop construct. Patch is 32.13 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/166903.diff 6 Files Affected:
diff --git a/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h b/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h
index b3d7ab4acf303..18828380abd32 100644
--- a/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h
+++ b/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h
@@ -1359,6 +1359,22 @@ class OpenMPIRBuilder {
: DepKind(DepKind), DepValueType(DepValueType), DepVal(DepVal) {}
};
+ /// Generator for `#omp taskloop`
+ ///
+ /// \param Loc The location where the taskloop construct was encountered.
+ /// \param AllocaIP The insertion point to be used for alloca instructions.
+ /// \param BodyGenCB Callback that will generate the region code.
+ /// \param LoopInfo Callback that return the CLI
+ /// \param LBVal Lowerbound value of loop
+ /// \param UBVal Upperbound value of loop
+ /// \param StepVal Step value of loop
+ /// \param Tied True if the task is tied, false if the task is untied.
+ LLVM_ABI InsertPointOrErrorTy createTaskloop(
+ const LocationDescription &Loc, InsertPointTy AllocaIP,
+ BodyGenCallbackTy BodyGenCB,
+ llvm::function_ref<llvm::Expected<llvm::CanonicalLoopInfo *>()> LoopInfo,
+ Value *LBVal, Value *UBVal, Value *StepVal, bool Tied = true);
+
/// Generator for `#omp task`
///
/// \param Loc The location where the task construct was encountered.
diff --git a/llvm/include/llvm/Frontend/OpenMP/OMPKinds.def b/llvm/include/llvm/Frontend/OpenMP/OMPKinds.def
index 46b3d53a4b408..032495dfe9d61 100644
--- a/llvm/include/llvm/Frontend/OpenMP/OMPKinds.def
+++ b/llvm/include/llvm/Frontend/OpenMP/OMPKinds.def
@@ -95,6 +95,7 @@ __OMP_STRUCT_TYPE(KernelArgs, __tgt_kernel_arguments, false, Int32, Int32, VoidP
__OMP_STRUCT_TYPE(AsyncInfo, __tgt_async_info, false, Int8Ptr)
__OMP_STRUCT_TYPE(DependInfo, kmp_dep_info, false, SizeTy, SizeTy, Int8)
__OMP_STRUCT_TYPE(Task, kmp_task_ompbuilder_t, false, VoidPtr, VoidPtr, Int32, VoidPtr, VoidPtr)
+__OMP_STRUCT_TYPE(Taskloop, kmp_task_info, false, VoidPtr, VoidPtr, Int32, VoidPtr, VoidPtr, Int64, Int64, Int64)
__OMP_STRUCT_TYPE(ConfigurationEnvironment, ConfigurationEnvironmentTy, false,
Int8, Int8, Int8, Int32, Int32, Int32, Int32, Int32, Int32)
__OMP_STRUCT_TYPE(DynamicEnvironment, DynamicEnvironmentTy, false, Int16)
diff --git a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
index fff9a815e5368..e88e722b1370e 100644
--- a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
+++ b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
@@ -1933,6 +1933,205 @@ static Value *emitTaskDependencies(
return DepArray;
}
+OpenMPIRBuilder::InsertPointOrErrorTy OpenMPIRBuilder::createTaskloop(
+ const LocationDescription &Loc, InsertPointTy AllocaIP,
+ BodyGenCallbackTy BodyGenCB,
+ llvm::function_ref<llvm::Expected<llvm::CanonicalLoopInfo *>()> loopInfo,
+ Value *LBVal, Value *UBVal, Value *StepVal, bool Tied) {
+
+ if (!updateToLocation(Loc))
+ return InsertPointTy();
+
+ uint32_t SrcLocStrSize;
+ Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
+ Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
+
+ BasicBlock *TaskloopExitBB =
+ splitBB(Builder, /*CreateBranch=*/true, "taskloop.exit");
+ BasicBlock *TaskloopBodyBB =
+ splitBB(Builder, /*CreateBranch=*/true, "taskloop.body");
+ BasicBlock *TaskloopAllocaBB =
+ splitBB(Builder, /*CreateBranch=*/true, "taskloop.alloca");
+
+ InsertPointTy TaskloopAllocaIP =
+ InsertPointTy(TaskloopAllocaBB, TaskloopAllocaBB->begin());
+ InsertPointTy TaskloopBodyIP =
+ InsertPointTy(TaskloopBodyBB, TaskloopBodyBB->begin());
+
+ if (Error Err = BodyGenCB(TaskloopAllocaIP, TaskloopBodyIP))
+ return Err;
+
+ llvm::Expected<llvm::CanonicalLoopInfo *> result = loopInfo();
+ if (!result) {
+ return result.takeError();
+ }
+
+ llvm::CanonicalLoopInfo *CLI = result.get();
+ OutlineInfo OI;
+ OI.EntryBB = TaskloopAllocaBB;
+ OI.OuterAllocaBB = AllocaIP.getBlock();
+ OI.ExitBB = TaskloopExitBB;
+
+ // Add the thread ID argument.
+ SmallVector<Instruction *, 4> ToBeDeleted;
+ // dummy instruction to be used as a fake argument
+ OI.ExcludeArgsFromAggregate.push_back(createFakeIntVal(
+ Builder, AllocaIP, ToBeDeleted, TaskloopAllocaIP, "global.tid", false));
+
+ OI.PostOutlineCB = [this, Ident, LBVal, UBVal, StepVal, Tied,
+ TaskloopAllocaBB, CLI, Loc,
+ ToBeDeleted](Function &OutlinedFn) mutable {
+ // Replace the Stale CI by appropriate RTL function call.
+ assert(OutlinedFn.hasOneUse() &&
+ "there must be a single user for the outlined function");
+ CallInst *StaleCI = cast<CallInst>(OutlinedFn.user_back());
+
+ // HasShareds is true if any variables are captured in the outlined region,
+ // false otherwise.
+ bool HasShareds = StaleCI->arg_size() > 1;
+ Builder.SetInsertPoint(StaleCI);
+
+ // Gather the arguments for emitting the runtime call for
+ // @__kmpc_omp_task_alloc
+ Function *TaskAllocFn =
+ getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_omp_task_alloc);
+
+ Value *ThreadID = getOrCreateThreadID(Ident);
+
+ // Emit runtime call for @__kmpc_taskgroup
+ Function *TaskgroupFn =
+ getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_taskgroup);
+ Builder.CreateCall(TaskgroupFn, {Ident, ThreadID});
+
+ // The flags are set to 1 if the task is tied, 0 otherwise.
+ Value *Flags = Builder.getInt32(Tied);
+
+ Value *TaskSize = Builder.getInt64(
+ divideCeil(M.getDataLayout().getTypeSizeInBits(Taskloop), 8));
+
+ Value *SharedsSize = Builder.getInt64(0);
+ if (HasShareds) {
+ AllocaInst *ArgStructAlloca =
+ dyn_cast<AllocaInst>(StaleCI->getArgOperand(1));
+ assert(ArgStructAlloca &&
+ "Unable to find the alloca instruction corresponding to arguments "
+ "for extracted function");
+ StructType *ArgStructType =
+ dyn_cast<StructType>(ArgStructAlloca->getAllocatedType());
+ assert(ArgStructType && "Unable to find struct type corresponding to "
+ "arguments for extracted function");
+ SharedsSize =
+ Builder.getInt64(M.getDataLayout().getTypeStoreSize(ArgStructType));
+ }
+
+ // Emit the @__kmpc_omp_task_alloc runtime call
+ // The runtime call returns a pointer to an area where the task captured
+ // variables must be copied before the task is run (TaskData)
+ CallInst *TaskData = Builder.CreateCall(
+ TaskAllocFn, {/*loc_ref=*/Ident, /*gtid=*/ThreadID, /*flags=*/Flags,
+ /*sizeof_task=*/TaskSize, /*sizeof_shared=*/SharedsSize,
+ /*task_func=*/&OutlinedFn});
+
+ // Get the pointer to loop lb, ub, step from task ptr
+ // and set up the lowerbound,upperbound and step values
+ llvm::Value *lb =
+ Builder.CreateStructGEP(OpenMPIRBuilder::Taskloop, TaskData, 5);
+ // Value *LbVal_ext = Builder.CreateSExt(LBVal, Builder.getInt64Ty());
+ Builder.CreateStore(LBVal, lb);
+
+ llvm::Value *ub =
+ Builder.CreateStructGEP(OpenMPIRBuilder::Taskloop, TaskData, 6);
+ Builder.CreateStore(UBVal, ub);
+
+ llvm::Value *step =
+ Builder.CreateStructGEP(OpenMPIRBuilder::Taskloop, TaskData, 7);
+ Value *Step_ext = Builder.CreateSExt(StepVal, Builder.getInt64Ty());
+ Builder.CreateStore(Step_ext, step);
+ llvm::Value *loadstep = Builder.CreateLoad(Builder.getInt64Ty(), step);
+
+ if (HasShareds) {
+ Value *Shareds = StaleCI->getArgOperand(1);
+ Align Alignment = TaskData->getPointerAlignment(M.getDataLayout());
+ Value *TaskShareds = Builder.CreateLoad(VoidPtr, TaskData);
+ Builder.CreateMemCpy(TaskShareds, Alignment, Shareds, Alignment,
+ SharedsSize);
+ }
+
+ // set up the arguments for emitting kmpc_taskloop runtime call
+ // setting default values for ifval, nogroup, sched, grainsize, task_dup
+ Value *IfVal = Builder.getInt32(1);
+ Value *NoGroup = Builder.getInt32(1);
+ Value *Sched = Builder.getInt32(0);
+ Value *GrainSize = Builder.getInt64(0);
+ Value *TaskDup = Constant::getNullValue(Builder.getPtrTy());
+
+ Value *Args[] = {Ident, ThreadID, TaskData, IfVal, lb, ub,
+ loadstep, NoGroup, Sched, GrainSize, TaskDup};
+
+ // taskloop runtime call
+ Function *TaskloopFn =
+ getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_taskloop);
+ Builder.CreateCall(TaskloopFn, Args);
+
+ // Emit the @__kmpc_end_taskgroup runtime call to end the taskgroup
+ Function *EndTaskgroupFn =
+ getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_taskgroup);
+ Builder.CreateCall(EndTaskgroupFn, {Ident, ThreadID});
+
+ StaleCI->eraseFromParent();
+
+ Builder.SetInsertPoint(TaskloopAllocaBB, TaskloopAllocaBB->begin());
+
+ if (HasShareds) {
+ LoadInst *Shareds = Builder.CreateLoad(VoidPtr, OutlinedFn.getArg(1));
+ OutlinedFn.getArg(1)->replaceUsesWithIf(
+ Shareds, [Shareds](Use &U) { return U.getUser() != Shareds; });
+ }
+
+ Value *IV = CLI->getIndVar();
+ Type *IVTy = IV->getType();
+ Constant *One = ConstantInt::get(IVTy, 1);
+
+ Value *task_lb = Builder.CreateStructGEP(OpenMPIRBuilder::Taskloop,
+ OutlinedFn.getArg(1), 5, "gep_lb");
+ Value *LowerBound = Builder.CreateLoad(IVTy, task_lb, "lb");
+
+ Value *task_ub = Builder.CreateStructGEP(OpenMPIRBuilder::Taskloop,
+ OutlinedFn.getArg(1), 6, "gep_ub");
+ Value *UpperBound = Builder.CreateLoad(IVTy, task_ub, "ub");
+
+ Builder.SetInsertPoint(CLI->getPreheader()->getTerminator());
+
+ Value *TripCountMinusOne = Builder.CreateSub(UpperBound, LowerBound);
+ Value *TripCount = Builder.CreateAdd(TripCountMinusOne, One, "trip_cnt");
+ // set the trip count in the CLI
+ CLI->setTripCount(TripCount);
+
+ Builder.SetInsertPoint(CLI->getBody(),
+ CLI->getBody()->getFirstInsertionPt());
+
+ llvm::BasicBlock *Body = CLI->getBody();
+ for (llvm::Instruction &I : *Body) {
+ if (auto *Add = llvm::dyn_cast<llvm::BinaryOperator>(&I)) {
+ if (Add->getOpcode() == llvm::Instruction::Add) {
+ if (llvm::isa<llvm::BinaryOperator>(Add->getOperand(0))) {
+ // update the starting index of the loop
+ Add->setOperand(1, LowerBound);
+ }
+ }
+ }
+ }
+
+ for (Instruction *I : llvm::reverse(ToBeDeleted)) {
+ I->eraseFromParent();
+ }
+ };
+
+ addOutlineInfo(std::move(OI));
+ Builder.SetInsertPoint(TaskloopExitBB, TaskloopExitBB->begin());
+ return Builder.saveIP();
+}
+
OpenMPIRBuilder::InsertPointOrErrorTy OpenMPIRBuilder::createTask(
const LocationDescription &Loc, InsertPointTy AllocaIP,
BodyGenCallbackTy BodyGenCB, bool Tied, Value *Final, Value *IfCondition,
diff --git a/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp b/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp
index 8edec990eaaba..d69fcd3db0413 100644
--- a/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp
+++ b/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp
@@ -323,6 +323,18 @@ static LogicalResult checkImplementationStatus(Operation &op) {
if (op.getDistScheduleChunkSize())
result = todo("dist_schedule with chunk_size");
};
+ auto checkFinal = [&todo](auto op, LogicalResult &result) {
+ if (op.getFinal())
+ result = todo("final");
+ };
+ auto checkGrainsize = [&todo](auto op, LogicalResult &result) {
+ if (op.getGrainsize())
+ result = todo("grainsize");
+ };
+ auto checkIf = [](auto op, LogicalResult &) {
+ if (op.getIfExpr())
+ op.emitWarning("if");
+ };
auto checkHint = [](auto op, LogicalResult &) {
if (op.getHint())
op.emitWarning("hint clause discarded");
@@ -340,10 +352,22 @@ static LogicalResult checkImplementationStatus(Operation &op) {
if (!op.getLinearVars().empty() || !op.getLinearStepVars().empty())
result = todo("linear");
};
+ auto checkMergeable = [&todo](auto op, LogicalResult &result) {
+ if (op.getMergeable())
+ result = todo("mergeable");
+ };
+ auto checkNogroup = [&todo](auto op, LogicalResult &result) {
+ if (op.getNogroup())
+ result = todo("nogroup");
+ };
auto checkNowait = [&todo](auto op, LogicalResult &result) {
if (op.getNowait())
result = todo("nowait");
};
+ auto checkNumTasks = [&todo](auto op, LogicalResult &result) {
+ if (op.getNumTasks())
+ result = todo("num_tasks");
+ };
auto checkOrder = [&todo](auto op, LogicalResult &result) {
if (op.getOrder() || op.getOrderMod())
result = todo("order");
@@ -417,7 +441,15 @@ static LogicalResult checkImplementationStatus(Operation &op) {
checkNowait(op, result);
})
.Case([&](omp::TaskloopOp op) {
- // TODO: Add other clauses check
+ checkAllocate(op, result);
+ checkFinal(op, result);
+ checkGrainsize(op, result);
+ checkIf(op, result);
+ checkInReduction(op, result);
+ checkMergeable(op, result);
+ checkNogroup(op, result);
+ checkNumTasks(op, result);
+ checkReduction(op, result);
checkUntied(op, result);
checkPriority(op, result);
})
@@ -2097,6 +2129,8 @@ class TaskContextStructManager {
/// private decls.
void createGEPsToPrivateVars();
+ llvm::Value *isAllocated();
+
/// De-allocate the task context structure.
void freeStructPtr();
@@ -2177,13 +2211,26 @@ void TaskContextStructManager::createGEPsToPrivateVars() {
}
}
+llvm::Value *TaskContextStructManager::isAllocated() {
+ if (!structPtr)
+ return nullptr;
+
+ return builder.CreateIsNotNull(structPtr);
+}
+
void TaskContextStructManager::freeStructPtr() {
if (!structPtr)
return;
llvm::IRBuilderBase::InsertPointGuard guard{builder};
- // Ensure we don't put the call to free() after the terminator
- builder.SetInsertPoint(builder.GetInsertBlock()->getTerminator());
+ llvm::BasicBlock *currentBlock = builder.GetInsertBlock();
+ if (currentBlock->getTerminator()) {
+ // Ensure we don't put the call to free() after the terminator
+ builder.SetInsertPoint(currentBlock->getTerminator());
+ } else {
+ // Insert the call to free() at the end of the current block
+ builder.SetInsertPoint(currentBlock);
+ }
builder.CreateFree(structPtr);
}
@@ -2419,6 +2466,207 @@ convertOmpTaskOp(omp::TaskOp taskOp, llvm::IRBuilderBase &builder,
return success();
}
+// Converts an OpenMP taskloop construct into LLVM IR using OpenMPIRBuilder.
+static LogicalResult
+convertOmpTaskloopOp(Operation &opInst, llvm::IRBuilderBase &builder,
+ LLVM::ModuleTranslation &moduleTranslation) {
+ using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
+ auto taskloopOp = cast<omp::TaskloopOp>(opInst);
+ if (failed(checkImplementationStatus(opInst)))
+ return failure();
+
+ // It stores the pointer of allocated firstprivate copies,
+ // which can be used later for freeing the allocated space.
+ SmallVector<llvm::Value *> llvmFirstPrivateVars;
+ PrivateVarsInfo privateVarsInfo(taskloopOp);
+ TaskContextStructManager taskStructMgr{builder, moduleTranslation,
+ privateVarsInfo.privatizers};
+
+ llvm::OpenMPIRBuilder::InsertPointTy allocaIP =
+ findAllocaInsertPoint(builder, moduleTranslation);
+
+ assert(builder.GetInsertPoint() == builder.GetInsertBlock()->end());
+ llvm::BasicBlock *taskloopStartBlock = llvm::BasicBlock::Create(
+ builder.getContext(), "omp.taskloop.start",
+ /*Parent=*/builder.GetInsertBlock()->getParent());
+ llvm::Instruction *branchToTaskloopStartBlock =
+ builder.CreateBr(taskloopStartBlock);
+ builder.SetInsertPoint(branchToTaskloopStartBlock);
+
+ llvm::BasicBlock *copyBlock =
+ splitBB(builder, /*CreateBranch=*/true, "omp.private.copy");
+ llvm::BasicBlock *initBlock =
+ splitBB(builder, /*CreateBranch=*/true, "omp.private.init");
+
+ LLVM::ModuleTranslation::SaveStack<OpenMPAllocaStackFrame> frame(
+ moduleTranslation, allocaIP);
+
+ // Allocate and initialize private variables
+ builder.SetInsertPoint(initBlock->getTerminator());
+
+ taskStructMgr.generateTaskContextStruct();
+ taskStructMgr.createGEPsToPrivateVars();
+
+ llvmFirstPrivateVars.resize(privateVarsInfo.blockArgs.size());
+ int index = 0;
+
+ for (auto [privDecl, mlirPrivVar, blockArg, llvmPrivateVarAlloc] :
+ llvm::zip_equal(privateVarsInfo.privatizers, privateVarsInfo.mlirVars,
+ privateVarsInfo.blockArgs,
+ taskStructMgr.getLLVMPrivateVarGEPs())) {
+ // To be handled inside the taskloop.
+ if (!privDecl.readsFromMold())
+ continue;
+ assert(llvmPrivateVarAlloc &&
+ "reads from mold so shouldn't have been skipped");
+
+ llvm::Expected<llvm::Value *> privateVarOrErr =
+ initPrivateVar(builder, moduleTranslation, privDecl, mlirPrivVar,
+ blockArg, llvmPrivateVarAlloc, initBlock);
+ if (!privateVarOrErr)
+ return handleError(privateVarOrErr, *taskloopOp.getOperation());
+
+ llvmFirstPrivateVars[index++] = privateVarOrErr.get();
+
+ llvm::IRBuilderBase::InsertPointGuard guard(builder);
+ builder.SetInsertPoint(builder.GetInsertBlock()->getTerminator());
+
+ if ((privateVarOrErr.get() != llvmPrivateVarAlloc) &&
+ !mlir::isa<LLVM::LLVMPointerType>(blockArg.getType())) {
+ builder.CreateStore(privateVarOrErr.get(), llvmPrivateVarAlloc);
+ // Load it so we have the value pointed to by the GEP
+ llvmPrivateVarAlloc = builder.CreateLoad(privateVarOrErr.get()->getType(),
+ llvmPrivateVarAlloc);
+ }
+ assert(llvmPrivateVarAlloc->getType() ==
+ moduleTranslation.convertType(blockArg.getType()));
+ }
+
+ // firstprivate copy region
+ setInsertPointForPossiblyEmptyBlock(builder, copyBlock);
+ if (failed(copyFirstPrivateVars(
+ taskloopOp, builder, moduleTranslation, privateVarsInfo.mlirVars,
+ taskStructMgr.getLLVMPrivateVarGEPs(), privateVarsInfo.privatizers,
+ taskloopOp.getPrivateNeedsBarrier())))
+ return llvm::failure();
+
+ // Set up inserttion point for call to createTaskloop()
+ builder.SetInsertPoint(taskloopStartBlock);
+
+ auto bodyCB = [&](InsertPointTy allocaIP,
+ InsertPointTy codegenIP) -> llvm::Error {
+ // Save the alloca insertion point on ModuleTranslation stack for use in
+ // nested regions.
+ LLVM::ModuleTranslation::SaveStack<OpenMPAllocaStackFrame> frame(
+ moduleTranslation, allocaIP);
+
+ // translate the body of the taskloop:
+ builder.restoreIP(codegenIP);
+
+ llvm::BasicBlock *privInitBlock = nullptr;
+ privateVarsInfo.llvmVars.resize(privateVarsInfo.blockArgs.size());
+ for (auto [i, zip] : llvm::enumerate(llvm::zip_equal(
+ privateVarsInfo.blockArgs, privateVarsInfo.privatizers,
+ privateVarsInfo.mlirVars))) {
+ auto [blockArg, privDecl, mlirPrivVar] = zip;
+ // This is handled before the task executes
+ if (privDecl.readsFromMold())
+ continue;
+
+ llvm::IRBuilderBase::InsertPointGuard guard(builder);
+ llvm::Type *llvmAllocType =
+ moduleTranslation.convertType(privDecl.getType());
+ builder.SetInsertPoint(allocaIP.getBlock()->getTerminator());
+ llvm::Value *llvmPrivateVar = builder.CreateAlloca(
+ llvmAllocType, /*ArraySize=*/nullptr, "omp.private.alloc");
+
+ llvm::Expected<llvm::Value *> privateVarOrError =
+ initPrivateVar(builder, moduleTranslation, privDecl, mlirPrivVar,
+ blockArg, llvmPrivateVar, privInitBlock);
+ if (!privateVarOrError)
+ return privateVarOrError.takeError();
+ moduleTranslation.mapValue(blockArg, privateVarOrError.get());
+ privateVarsInfo.llvmVars[i] = privateVarOrError.get();
+ // Add private var to llvmFirstPrivateVars
+ llvmFirstPrivateVars[index++] = privateVarOrError.get();
+ }
+
+ taskStructMgr.createGEPsToPrivateVars();
+ for (auto [i, llvmPrivVar] :
+ llvm::enumerate(taskStructMgr.getLLVMPrivateVarGEPs())) {
+ if (!llvmPrivVar) {
...
[truncated]
|
|
@llvm/pr-subscribers-mlir Author: Kaviya Rajendiran (kaviya2510) ChangesAdded translation support for taskloop construct. Patch is 32.13 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/166903.diff 6 Files Affected:
diff --git a/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h b/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h
index b3d7ab4acf303..18828380abd32 100644
--- a/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h
+++ b/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h
@@ -1359,6 +1359,22 @@ class OpenMPIRBuilder {
: DepKind(DepKind), DepValueType(DepValueType), DepVal(DepVal) {}
};
+ /// Generator for `#omp taskloop`
+ ///
+ /// \param Loc The location where the taskloop construct was encountered.
+ /// \param AllocaIP The insertion point to be used for alloca instructions.
+ /// \param BodyGenCB Callback that will generate the region code.
+ /// \param LoopInfo Callback that return the CLI
+ /// \param LBVal Lowerbound value of loop
+ /// \param UBVal Upperbound value of loop
+ /// \param StepVal Step value of loop
+ /// \param Tied True if the task is tied, false if the task is untied.
+ LLVM_ABI InsertPointOrErrorTy createTaskloop(
+ const LocationDescription &Loc, InsertPointTy AllocaIP,
+ BodyGenCallbackTy BodyGenCB,
+ llvm::function_ref<llvm::Expected<llvm::CanonicalLoopInfo *>()> LoopInfo,
+ Value *LBVal, Value *UBVal, Value *StepVal, bool Tied = true);
+
/// Generator for `#omp task`
///
/// \param Loc The location where the task construct was encountered.
diff --git a/llvm/include/llvm/Frontend/OpenMP/OMPKinds.def b/llvm/include/llvm/Frontend/OpenMP/OMPKinds.def
index 46b3d53a4b408..032495dfe9d61 100644
--- a/llvm/include/llvm/Frontend/OpenMP/OMPKinds.def
+++ b/llvm/include/llvm/Frontend/OpenMP/OMPKinds.def
@@ -95,6 +95,7 @@ __OMP_STRUCT_TYPE(KernelArgs, __tgt_kernel_arguments, false, Int32, Int32, VoidP
__OMP_STRUCT_TYPE(AsyncInfo, __tgt_async_info, false, Int8Ptr)
__OMP_STRUCT_TYPE(DependInfo, kmp_dep_info, false, SizeTy, SizeTy, Int8)
__OMP_STRUCT_TYPE(Task, kmp_task_ompbuilder_t, false, VoidPtr, VoidPtr, Int32, VoidPtr, VoidPtr)
+__OMP_STRUCT_TYPE(Taskloop, kmp_task_info, false, VoidPtr, VoidPtr, Int32, VoidPtr, VoidPtr, Int64, Int64, Int64)
__OMP_STRUCT_TYPE(ConfigurationEnvironment, ConfigurationEnvironmentTy, false,
Int8, Int8, Int8, Int32, Int32, Int32, Int32, Int32, Int32)
__OMP_STRUCT_TYPE(DynamicEnvironment, DynamicEnvironmentTy, false, Int16)
diff --git a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
index fff9a815e5368..e88e722b1370e 100644
--- a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
+++ b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
@@ -1933,6 +1933,205 @@ static Value *emitTaskDependencies(
return DepArray;
}
+OpenMPIRBuilder::InsertPointOrErrorTy OpenMPIRBuilder::createTaskloop(
+ const LocationDescription &Loc, InsertPointTy AllocaIP,
+ BodyGenCallbackTy BodyGenCB,
+ llvm::function_ref<llvm::Expected<llvm::CanonicalLoopInfo *>()> loopInfo,
+ Value *LBVal, Value *UBVal, Value *StepVal, bool Tied) {
+
+ if (!updateToLocation(Loc))
+ return InsertPointTy();
+
+ uint32_t SrcLocStrSize;
+ Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
+ Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
+
+ BasicBlock *TaskloopExitBB =
+ splitBB(Builder, /*CreateBranch=*/true, "taskloop.exit");
+ BasicBlock *TaskloopBodyBB =
+ splitBB(Builder, /*CreateBranch=*/true, "taskloop.body");
+ BasicBlock *TaskloopAllocaBB =
+ splitBB(Builder, /*CreateBranch=*/true, "taskloop.alloca");
+
+ InsertPointTy TaskloopAllocaIP =
+ InsertPointTy(TaskloopAllocaBB, TaskloopAllocaBB->begin());
+ InsertPointTy TaskloopBodyIP =
+ InsertPointTy(TaskloopBodyBB, TaskloopBodyBB->begin());
+
+ if (Error Err = BodyGenCB(TaskloopAllocaIP, TaskloopBodyIP))
+ return Err;
+
+ llvm::Expected<llvm::CanonicalLoopInfo *> result = loopInfo();
+ if (!result) {
+ return result.takeError();
+ }
+
+ llvm::CanonicalLoopInfo *CLI = result.get();
+ OutlineInfo OI;
+ OI.EntryBB = TaskloopAllocaBB;
+ OI.OuterAllocaBB = AllocaIP.getBlock();
+ OI.ExitBB = TaskloopExitBB;
+
+ // Add the thread ID argument.
+ SmallVector<Instruction *, 4> ToBeDeleted;
+ // dummy instruction to be used as a fake argument
+ OI.ExcludeArgsFromAggregate.push_back(createFakeIntVal(
+ Builder, AllocaIP, ToBeDeleted, TaskloopAllocaIP, "global.tid", false));
+
+ OI.PostOutlineCB = [this, Ident, LBVal, UBVal, StepVal, Tied,
+ TaskloopAllocaBB, CLI, Loc,
+ ToBeDeleted](Function &OutlinedFn) mutable {
+ // Replace the Stale CI by appropriate RTL function call.
+ assert(OutlinedFn.hasOneUse() &&
+ "there must be a single user for the outlined function");
+ CallInst *StaleCI = cast<CallInst>(OutlinedFn.user_back());
+
+ // HasShareds is true if any variables are captured in the outlined region,
+ // false otherwise.
+ bool HasShareds = StaleCI->arg_size() > 1;
+ Builder.SetInsertPoint(StaleCI);
+
+ // Gather the arguments for emitting the runtime call for
+ // @__kmpc_omp_task_alloc
+ Function *TaskAllocFn =
+ getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_omp_task_alloc);
+
+ Value *ThreadID = getOrCreateThreadID(Ident);
+
+ // Emit runtime call for @__kmpc_taskgroup
+ Function *TaskgroupFn =
+ getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_taskgroup);
+ Builder.CreateCall(TaskgroupFn, {Ident, ThreadID});
+
+ // The flags are set to 1 if the task is tied, 0 otherwise.
+ Value *Flags = Builder.getInt32(Tied);
+
+ Value *TaskSize = Builder.getInt64(
+ divideCeil(M.getDataLayout().getTypeSizeInBits(Taskloop), 8));
+
+ Value *SharedsSize = Builder.getInt64(0);
+ if (HasShareds) {
+ AllocaInst *ArgStructAlloca =
+ dyn_cast<AllocaInst>(StaleCI->getArgOperand(1));
+ assert(ArgStructAlloca &&
+ "Unable to find the alloca instruction corresponding to arguments "
+ "for extracted function");
+ StructType *ArgStructType =
+ dyn_cast<StructType>(ArgStructAlloca->getAllocatedType());
+ assert(ArgStructType && "Unable to find struct type corresponding to "
+ "arguments for extracted function");
+ SharedsSize =
+ Builder.getInt64(M.getDataLayout().getTypeStoreSize(ArgStructType));
+ }
+
+ // Emit the @__kmpc_omp_task_alloc runtime call
+ // The runtime call returns a pointer to an area where the task captured
+ // variables must be copied before the task is run (TaskData)
+ CallInst *TaskData = Builder.CreateCall(
+ TaskAllocFn, {/*loc_ref=*/Ident, /*gtid=*/ThreadID, /*flags=*/Flags,
+ /*sizeof_task=*/TaskSize, /*sizeof_shared=*/SharedsSize,
+ /*task_func=*/&OutlinedFn});
+
+ // Get the pointer to loop lb, ub, step from task ptr
+ // and set up the lowerbound,upperbound and step values
+ llvm::Value *lb =
+ Builder.CreateStructGEP(OpenMPIRBuilder::Taskloop, TaskData, 5);
+ // Value *LbVal_ext = Builder.CreateSExt(LBVal, Builder.getInt64Ty());
+ Builder.CreateStore(LBVal, lb);
+
+ llvm::Value *ub =
+ Builder.CreateStructGEP(OpenMPIRBuilder::Taskloop, TaskData, 6);
+ Builder.CreateStore(UBVal, ub);
+
+ llvm::Value *step =
+ Builder.CreateStructGEP(OpenMPIRBuilder::Taskloop, TaskData, 7);
+ Value *Step_ext = Builder.CreateSExt(StepVal, Builder.getInt64Ty());
+ Builder.CreateStore(Step_ext, step);
+ llvm::Value *loadstep = Builder.CreateLoad(Builder.getInt64Ty(), step);
+
+ if (HasShareds) {
+ Value *Shareds = StaleCI->getArgOperand(1);
+ Align Alignment = TaskData->getPointerAlignment(M.getDataLayout());
+ Value *TaskShareds = Builder.CreateLoad(VoidPtr, TaskData);
+ Builder.CreateMemCpy(TaskShareds, Alignment, Shareds, Alignment,
+ SharedsSize);
+ }
+
+ // set up the arguments for emitting kmpc_taskloop runtime call
+ // setting default values for ifval, nogroup, sched, grainsize, task_dup
+ Value *IfVal = Builder.getInt32(1);
+ Value *NoGroup = Builder.getInt32(1);
+ Value *Sched = Builder.getInt32(0);
+ Value *GrainSize = Builder.getInt64(0);
+ Value *TaskDup = Constant::getNullValue(Builder.getPtrTy());
+
+ Value *Args[] = {Ident, ThreadID, TaskData, IfVal, lb, ub,
+ loadstep, NoGroup, Sched, GrainSize, TaskDup};
+
+ // taskloop runtime call
+ Function *TaskloopFn =
+ getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_taskloop);
+ Builder.CreateCall(TaskloopFn, Args);
+
+ // Emit the @__kmpc_end_taskgroup runtime call to end the taskgroup
+ Function *EndTaskgroupFn =
+ getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_taskgroup);
+ Builder.CreateCall(EndTaskgroupFn, {Ident, ThreadID});
+
+ StaleCI->eraseFromParent();
+
+ Builder.SetInsertPoint(TaskloopAllocaBB, TaskloopAllocaBB->begin());
+
+ if (HasShareds) {
+ LoadInst *Shareds = Builder.CreateLoad(VoidPtr, OutlinedFn.getArg(1));
+ OutlinedFn.getArg(1)->replaceUsesWithIf(
+ Shareds, [Shareds](Use &U) { return U.getUser() != Shareds; });
+ }
+
+ Value *IV = CLI->getIndVar();
+ Type *IVTy = IV->getType();
+ Constant *One = ConstantInt::get(IVTy, 1);
+
+ Value *task_lb = Builder.CreateStructGEP(OpenMPIRBuilder::Taskloop,
+ OutlinedFn.getArg(1), 5, "gep_lb");
+ Value *LowerBound = Builder.CreateLoad(IVTy, task_lb, "lb");
+
+ Value *task_ub = Builder.CreateStructGEP(OpenMPIRBuilder::Taskloop,
+ OutlinedFn.getArg(1), 6, "gep_ub");
+ Value *UpperBound = Builder.CreateLoad(IVTy, task_ub, "ub");
+
+ Builder.SetInsertPoint(CLI->getPreheader()->getTerminator());
+
+ Value *TripCountMinusOne = Builder.CreateSub(UpperBound, LowerBound);
+ Value *TripCount = Builder.CreateAdd(TripCountMinusOne, One, "trip_cnt");
+ // set the trip count in the CLI
+ CLI->setTripCount(TripCount);
+
+ Builder.SetInsertPoint(CLI->getBody(),
+ CLI->getBody()->getFirstInsertionPt());
+
+ llvm::BasicBlock *Body = CLI->getBody();
+ for (llvm::Instruction &I : *Body) {
+ if (auto *Add = llvm::dyn_cast<llvm::BinaryOperator>(&I)) {
+ if (Add->getOpcode() == llvm::Instruction::Add) {
+ if (llvm::isa<llvm::BinaryOperator>(Add->getOperand(0))) {
+ // update the starting index of the loop
+ Add->setOperand(1, LowerBound);
+ }
+ }
+ }
+ }
+
+ for (Instruction *I : llvm::reverse(ToBeDeleted)) {
+ I->eraseFromParent();
+ }
+ };
+
+ addOutlineInfo(std::move(OI));
+ Builder.SetInsertPoint(TaskloopExitBB, TaskloopExitBB->begin());
+ return Builder.saveIP();
+}
+
OpenMPIRBuilder::InsertPointOrErrorTy OpenMPIRBuilder::createTask(
const LocationDescription &Loc, InsertPointTy AllocaIP,
BodyGenCallbackTy BodyGenCB, bool Tied, Value *Final, Value *IfCondition,
diff --git a/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp b/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp
index 8edec990eaaba..d69fcd3db0413 100644
--- a/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp
+++ b/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp
@@ -323,6 +323,18 @@ static LogicalResult checkImplementationStatus(Operation &op) {
if (op.getDistScheduleChunkSize())
result = todo("dist_schedule with chunk_size");
};
+ auto checkFinal = [&todo](auto op, LogicalResult &result) {
+ if (op.getFinal())
+ result = todo("final");
+ };
+ auto checkGrainsize = [&todo](auto op, LogicalResult &result) {
+ if (op.getGrainsize())
+ result = todo("grainsize");
+ };
+ auto checkIf = [](auto op, LogicalResult &) {
+ if (op.getIfExpr())
+ op.emitWarning("if");
+ };
auto checkHint = [](auto op, LogicalResult &) {
if (op.getHint())
op.emitWarning("hint clause discarded");
@@ -340,10 +352,22 @@ static LogicalResult checkImplementationStatus(Operation &op) {
if (!op.getLinearVars().empty() || !op.getLinearStepVars().empty())
result = todo("linear");
};
+ auto checkMergeable = [&todo](auto op, LogicalResult &result) {
+ if (op.getMergeable())
+ result = todo("mergeable");
+ };
+ auto checkNogroup = [&todo](auto op, LogicalResult &result) {
+ if (op.getNogroup())
+ result = todo("nogroup");
+ };
auto checkNowait = [&todo](auto op, LogicalResult &result) {
if (op.getNowait())
result = todo("nowait");
};
+ auto checkNumTasks = [&todo](auto op, LogicalResult &result) {
+ if (op.getNumTasks())
+ result = todo("num_tasks");
+ };
auto checkOrder = [&todo](auto op, LogicalResult &result) {
if (op.getOrder() || op.getOrderMod())
result = todo("order");
@@ -417,7 +441,15 @@ static LogicalResult checkImplementationStatus(Operation &op) {
checkNowait(op, result);
})
.Case([&](omp::TaskloopOp op) {
- // TODO: Add other clauses check
+ checkAllocate(op, result);
+ checkFinal(op, result);
+ checkGrainsize(op, result);
+ checkIf(op, result);
+ checkInReduction(op, result);
+ checkMergeable(op, result);
+ checkNogroup(op, result);
+ checkNumTasks(op, result);
+ checkReduction(op, result);
checkUntied(op, result);
checkPriority(op, result);
})
@@ -2097,6 +2129,8 @@ class TaskContextStructManager {
/// private decls.
void createGEPsToPrivateVars();
+ llvm::Value *isAllocated();
+
/// De-allocate the task context structure.
void freeStructPtr();
@@ -2177,13 +2211,26 @@ void TaskContextStructManager::createGEPsToPrivateVars() {
}
}
+llvm::Value *TaskContextStructManager::isAllocated() {
+ if (!structPtr)
+ return nullptr;
+
+ return builder.CreateIsNotNull(structPtr);
+}
+
void TaskContextStructManager::freeStructPtr() {
if (!structPtr)
return;
llvm::IRBuilderBase::InsertPointGuard guard{builder};
- // Ensure we don't put the call to free() after the terminator
- builder.SetInsertPoint(builder.GetInsertBlock()->getTerminator());
+ llvm::BasicBlock *currentBlock = builder.GetInsertBlock();
+ if (currentBlock->getTerminator()) {
+ // Ensure we don't put the call to free() after the terminator
+ builder.SetInsertPoint(currentBlock->getTerminator());
+ } else {
+ // Insert the call to free() at the end of the current block
+ builder.SetInsertPoint(currentBlock);
+ }
builder.CreateFree(structPtr);
}
@@ -2419,6 +2466,207 @@ convertOmpTaskOp(omp::TaskOp taskOp, llvm::IRBuilderBase &builder,
return success();
}
+// Converts an OpenMP taskloop construct into LLVM IR using OpenMPIRBuilder.
+static LogicalResult
+convertOmpTaskloopOp(Operation &opInst, llvm::IRBuilderBase &builder,
+ LLVM::ModuleTranslation &moduleTranslation) {
+ using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
+ auto taskloopOp = cast<omp::TaskloopOp>(opInst);
+ if (failed(checkImplementationStatus(opInst)))
+ return failure();
+
+ // It stores the pointer of allocated firstprivate copies,
+ // which can be used later for freeing the allocated space.
+ SmallVector<llvm::Value *> llvmFirstPrivateVars;
+ PrivateVarsInfo privateVarsInfo(taskloopOp);
+ TaskContextStructManager taskStructMgr{builder, moduleTranslation,
+ privateVarsInfo.privatizers};
+
+ llvm::OpenMPIRBuilder::InsertPointTy allocaIP =
+ findAllocaInsertPoint(builder, moduleTranslation);
+
+ assert(builder.GetInsertPoint() == builder.GetInsertBlock()->end());
+ llvm::BasicBlock *taskloopStartBlock = llvm::BasicBlock::Create(
+ builder.getContext(), "omp.taskloop.start",
+ /*Parent=*/builder.GetInsertBlock()->getParent());
+ llvm::Instruction *branchToTaskloopStartBlock =
+ builder.CreateBr(taskloopStartBlock);
+ builder.SetInsertPoint(branchToTaskloopStartBlock);
+
+ llvm::BasicBlock *copyBlock =
+ splitBB(builder, /*CreateBranch=*/true, "omp.private.copy");
+ llvm::BasicBlock *initBlock =
+ splitBB(builder, /*CreateBranch=*/true, "omp.private.init");
+
+ LLVM::ModuleTranslation::SaveStack<OpenMPAllocaStackFrame> frame(
+ moduleTranslation, allocaIP);
+
+ // Allocate and initialize private variables
+ builder.SetInsertPoint(initBlock->getTerminator());
+
+ taskStructMgr.generateTaskContextStruct();
+ taskStructMgr.createGEPsToPrivateVars();
+
+ llvmFirstPrivateVars.resize(privateVarsInfo.blockArgs.size());
+ int index = 0;
+
+ for (auto [privDecl, mlirPrivVar, blockArg, llvmPrivateVarAlloc] :
+ llvm::zip_equal(privateVarsInfo.privatizers, privateVarsInfo.mlirVars,
+ privateVarsInfo.blockArgs,
+ taskStructMgr.getLLVMPrivateVarGEPs())) {
+ // To be handled inside the taskloop.
+ if (!privDecl.readsFromMold())
+ continue;
+ assert(llvmPrivateVarAlloc &&
+ "reads from mold so shouldn't have been skipped");
+
+ llvm::Expected<llvm::Value *> privateVarOrErr =
+ initPrivateVar(builder, moduleTranslation, privDecl, mlirPrivVar,
+ blockArg, llvmPrivateVarAlloc, initBlock);
+ if (!privateVarOrErr)
+ return handleError(privateVarOrErr, *taskloopOp.getOperation());
+
+ llvmFirstPrivateVars[index++] = privateVarOrErr.get();
+
+ llvm::IRBuilderBase::InsertPointGuard guard(builder);
+ builder.SetInsertPoint(builder.GetInsertBlock()->getTerminator());
+
+ if ((privateVarOrErr.get() != llvmPrivateVarAlloc) &&
+ !mlir::isa<LLVM::LLVMPointerType>(blockArg.getType())) {
+ builder.CreateStore(privateVarOrErr.get(), llvmPrivateVarAlloc);
+ // Load it so we have the value pointed to by the GEP
+ llvmPrivateVarAlloc = builder.CreateLoad(privateVarOrErr.get()->getType(),
+ llvmPrivateVarAlloc);
+ }
+ assert(llvmPrivateVarAlloc->getType() ==
+ moduleTranslation.convertType(blockArg.getType()));
+ }
+
+ // firstprivate copy region
+ setInsertPointForPossiblyEmptyBlock(builder, copyBlock);
+ if (failed(copyFirstPrivateVars(
+ taskloopOp, builder, moduleTranslation, privateVarsInfo.mlirVars,
+ taskStructMgr.getLLVMPrivateVarGEPs(), privateVarsInfo.privatizers,
+ taskloopOp.getPrivateNeedsBarrier())))
+ return llvm::failure();
+
+ // Set up inserttion point for call to createTaskloop()
+ builder.SetInsertPoint(taskloopStartBlock);
+
+ auto bodyCB = [&](InsertPointTy allocaIP,
+ InsertPointTy codegenIP) -> llvm::Error {
+ // Save the alloca insertion point on ModuleTranslation stack for use in
+ // nested regions.
+ LLVM::ModuleTranslation::SaveStack<OpenMPAllocaStackFrame> frame(
+ moduleTranslation, allocaIP);
+
+ // translate the body of the taskloop:
+ builder.restoreIP(codegenIP);
+
+ llvm::BasicBlock *privInitBlock = nullptr;
+ privateVarsInfo.llvmVars.resize(privateVarsInfo.blockArgs.size());
+ for (auto [i, zip] : llvm::enumerate(llvm::zip_equal(
+ privateVarsInfo.blockArgs, privateVarsInfo.privatizers,
+ privateVarsInfo.mlirVars))) {
+ auto [blockArg, privDecl, mlirPrivVar] = zip;
+ // This is handled before the task executes
+ if (privDecl.readsFromMold())
+ continue;
+
+ llvm::IRBuilderBase::InsertPointGuard guard(builder);
+ llvm::Type *llvmAllocType =
+ moduleTranslation.convertType(privDecl.getType());
+ builder.SetInsertPoint(allocaIP.getBlock()->getTerminator());
+ llvm::Value *llvmPrivateVar = builder.CreateAlloca(
+ llvmAllocType, /*ArraySize=*/nullptr, "omp.private.alloc");
+
+ llvm::Expected<llvm::Value *> privateVarOrError =
+ initPrivateVar(builder, moduleTranslation, privDecl, mlirPrivVar,
+ blockArg, llvmPrivateVar, privInitBlock);
+ if (!privateVarOrError)
+ return privateVarOrError.takeError();
+ moduleTranslation.mapValue(blockArg, privateVarOrError.get());
+ privateVarsInfo.llvmVars[i] = privateVarOrError.get();
+ // Add private var to llvmFirstPrivateVars
+ llvmFirstPrivateVars[index++] = privateVarOrError.get();
+ }
+
+ taskStructMgr.createGEPsToPrivateVars();
+ for (auto [i, llvmPrivVar] :
+ llvm::enumerate(taskStructMgr.getLLVMPrivateVarGEPs())) {
+ if (!llvmPrivVar) {
...
[truncated]
|
tblah
left a comment
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Thank you for posting this Kaviya.
I have a few questions but for the most part I think this should be good to go (with the extra todo message I mention in one of the comments). I understand that you don't have time to work on this immediately so I would suggest merging this without much modification and then improvements can be made in smaller followup patches.
| if (auto *Add = llvm::dyn_cast<llvm::BinaryOperator>(&I)) { | ||
| if (Add->getOpcode() == llvm::Instruction::Add) { | ||
| if (llvm::isa<llvm::BinaryOperator>(Add->getOperand(0))) { | ||
| // update the starting index of the loop | ||
| Add->setOperand(1, LowerBound); | ||
| } | ||
| } | ||
| } |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Why exactly is this needed? I'm worried that this might match other Add instructions?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Yes, I agree with your comments that it might match other add instruction. The reason behind doing this change is that the taskloop construct divides the loop iterations into chunks and each chunk is executed as an explicit task. The loop bounds (lower bound, upper bound and step) for these chunks are computed by the runtime function __kmpc_taskloop(...) so we need to update the loop nest with the bounds returned by the runtime.
The current loop-nest translation sets the global loop bounds. This change ensures that the loop bounds are adjusted according to the values returned by the runtime.
I explored several alternative approaches to update the loop-bounds based on runtime, but none of them worked. Also the loop-nest translation is done at this stage where it returns the runtime loop bound values.
Could you share your thoughts, if you have better suggestion for handling this scenario?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
What is different about this compared to distribute or wsloop (where the runtime also sets the loop bounds)?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I recall reviewing the wsloop implementation while adding this support. If I remember correctly, I could not handle it in the same way because of the outlined function in taskloop (although I am not completely sure about that). I will check it again and get back to you.
| }; | ||
| auto checkIf = [](auto op, LogicalResult &) { | ||
| if (op.getIfExpr()) | ||
| op.emitWarning("if"); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Why is this a warning instead of using todo()?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Apologies, It should be a TODO check. Let me update it in follow up patch.
| Value *NoGroup = Builder.getInt32(1); | ||
| Value *Sched = Builder.getInt32(0); | ||
| Value *GrainSize = Builder.getInt64(0); | ||
| Value *TaskDup = Constant::getNullValue(Builder.getPtrTy()); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Taskloop works internally by duplicating the current task recursively until there are the right number of tasks to execute the loop with the requested degree of parallelism. I've only skimmed the code but I think it makes a direct copy of the task data structure allocated by the openmp runtime.
I wonder if we will need to use the task dup function for duplicating the task context structure allocated in OpenMPToLLVMIRConversion. Otherwise all of the tasks will share the same pointer to the same task context structure allocated in OpenMPToLLVMIRConversion.
Fixing this will take some work so I suggest we leave it as it is and land this patch emitting TODO when the task context structure isn't empty. Then someone can fix this in a follow up patch.
Does that sound okay to you?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Sure, I will add a TODO for it.
| // CHECK: br label %taskloop.exit | ||
|
|
||
| // CHECK: taskloop.exit: | ||
| // CHECK: tail call void @free(ptr %[[OMP_TASK_CONTEXT_PTR]]) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
A note to whoever implements nogroup:
The location of this free is valid because end_taskgroup waits until all generated tasks are complete before returning.
If end_taskgroup is not called, some other mechanism will have to be used to ensure that this free is not called until every thread has completed execution.
Stylie777
left a comment
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Thanks for the patch Kaviya!
I have a few comments but they do not block merging of this patch. Only thing I would like to see is TODO tests for all the TODO messages that have been added. If we need to add those in follow on patch, then thats ok too.
|
|
||
| // dummy check to ensure that the task context structure is accessed inside | ||
| // the outlined fn. | ||
| llvm::Value *cond = taskStructMgr.isAllocated(); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Unused variable, we can remove this.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
It is dummy check added with purpose.
For taskloop translation, I need to emit a call to __kmpc_taskloop(), which is taking a pointer to __kmpc_omp_task_alloc(). The outlined function _QQmain..omp_par() performs all the computation associated with the task.
Inside _QQmain..omp_par(), %omp.task.context_ptr is checked before extracting the private variables, and this pointer is freed at the end of the function. This logic resulted in a double-free issue with taskloop. To resolve the issue, I moved the deallocation so that %omp.task.context_ptr is freed only after the end of the taskgroup.
However, after moving this logic outside the outlined function, the outlined function _QQmain..omp_par(), no longer receives the second parameter (the pointer to the task structure). So I added a dummy check to check the allocation status of %omp.task.context_ptr, which passes the second parameter in all the cases.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The reason for the double free is what I said here: #166903 (comment)
When the task gets duplicated, nothing is duplicating the task context structure. This will be harder work to fix so we can do it separately as discussed.
But I am surprised that we get down to zero references to the task context structure inside the body of the task. The context structure should not be generated if it really isn't needed. Looking more closely, I think you are missing the second call to generate the GEPs into the task context structure inside the body callback. Otherwise it will be the GEPs themselves and not the task context structure which is viewed as a live in-value by the outliner.
But Jack and I were discussing offline and we thought the quickest way to get this working for everything except nogroup is to just skip the task context structure etc entirely and treat this like the privatization in OMP parallel. This isn't safe for individual tasks, but the implicit end group will block until all tasks complete and so they cannot outlive the current stack frame.
We will need the task context structure working to support nogroup, but it could take some work to get the duplication callback working correctly. We are worried that it might not land in time for llvm 22 and so we want to get as much as we can enabled first.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Thankyou for the response @tblah
The reason for the double free is what I said here: #166903 (comment)
I got it. I can able to visualize it clearly now.
When the task gets duplicated, nothing is duplicating the task context structure. This will be harder work to fix so we can do it separately as discussed.
Yeah, Sure. If it is okay to you, we can handle it later.
But Jack and I were discussing offline and we thought the quickest way to get this working for everything except nogroup is to just skip the task context structure etc entirely and treat this like the privatization in OMP parallel. This isn't safe for individual tasks, but the implicit end group will block until all tasks complete and so they cannot outlive the current stack frame.
My concern here is that when we are planning to add support for nogroup, then we need to reimplement the entire support to accept task context structure, am I right?
I have shared you an alternative approach in slack for handling this scenario(i.e storing privates, shareds, loop bounds, etc) in task_context structure so that we can access it in outline function without any issues. Kindly go through it and let me know if that approach is doable?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
We discussed on slack. Kaviya's solution involves storing the loop bounds along with shared variables in the runtime's task structure. This fits well with how the runtime is designed and it is what clang does. LGTM. As I understand it this should allow the removal of the pattern matching for the loop bounds.
We discussed more the GEPs etc. I was mistaken, the patch does appear to be doing things right. It is strange that the outliner doesn't pick up on the GEP use (only the free). I think it is okay to keep this dummy check for now (it will be removed as dead code anyway) to avoid expanding the scope of this PR too much.
Okay I understand your concerns. Especially with this new design for the loop bounds. I guess we will have to get the duplication function working then (me and Jack can take that up).
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Thankyou @tblah for taking the time to go through all my questions and respond so thoroughly.
During discussion with Tom on slack, I had proposed an another approach where we make use of the StructArg to store the loop bounds.
Here is the overview of new design:
- When we generate a task, the outlined function must see its inputs from StructArg. In the body callback (before outlining), we must generate two GEPs:
- A GEP, into the StructArg itself to record field (lb,ub,step) that the outlined function will later need.
%structArg = alloca { i64,i64,i64,ptr }, align 8
%task_lb = getelementptr { ptr, ptr, ptr, ptr }, ptr %1, i32 0, i32 0
%task_ub = getelementptr { ptr, ptr, ptr, ptr }, ptr %1, i32 0, i32 1
%task_st = getelementptr { ptr, ptr, ptr, ptr }, ptr %1, i32 0, i32 2
- A GEP into the structArg so you can store the task-context pointer into the struct.
%gep_omp.task.context_ptr = getelementptr { ptr,ptr,ptr,ptr }, ptr %1, i32 0, i32 3
%loadgep_omp.task.context_ptr = load ptr, ptr %gep_omp.task.context_ptr, align 8, !align !5
3.__kmpc_omp_task_alloc returns a pointer to a newly allocated kmp_task_t object.
4. Copy the StructArg into the allocated task of type kmp_task_t
5. During the call to @__kmpc_taskloop(..), the runtime access the kmp_task_t object and update the runtime-computed lb,ub and step value.
6. Later in the outlined function, we can extract the runtime bounds by GEP into the StructArg and task_context struct and adjust the loop-nest values.
With this approach, we can pack the loop bounds with StructArg and there is no need of %struct.kmp_task_info = type { ptr, ptr, i32, ptr, ptr, i64, i64, i64 }.
llvm::Value *cond = taskStructMgr.isAllocated();
The dummy check is no longer required under the new approach. Because the loop bounds are packed into StructArg, their usage is guaranteed within the outlined function, which ensures that the second argument can be passed correctly.
I am not sure that I can spend enough time to implement this new design, as it will require a significant effort. If either @tblah or @Stylie777 is available, please feel free to take it up. If you both have other priority work, I will discuss with @kiranktp and decide how to proceed.
Thankyou!
| return loopInfo; | ||
| }; | ||
|
|
||
| llvm::OpenMPIRBuilder &ompBuilder = *moduleTranslation.getOpenMPBuilder(); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Unused variable, we can remove this.
| OpenMPIRBuilder::InsertPointOrErrorTy OpenMPIRBuilder::createTaskloop( | ||
| const LocationDescription &Loc, InsertPointTy AllocaIP, | ||
| BodyGenCallbackTy BodyGenCB, | ||
| llvm::function_ref<llvm::Expected<llvm::CanonicalLoopInfo *>()> loopInfo, |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The name of loopInfo here does not seem to match that of the header file, this should really be LoopInfo. I also think we could just pass this as a pointer and an assert to check its present, if you disagree let me know.
| llvm::function_ref<llvm::Expected<llvm::CanonicalLoopInfo *>()> loopInfo, | |
| llvm::CanonicalLoopInfo * loopInfo, | |
| ... | |
| assert(loopInfo && "Expected CLI info"); |
| Type *IVTy = IV->getType(); | ||
| Constant *One = ConstantInt::get(IVTy, 1); | ||
|
|
||
| Value *task_lb = Builder.CreateStructGEP(OpenMPIRBuilder::Taskloop, |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
nit: Naming convention is not consistent with the rest of the file.
| Value *task_lb = Builder.CreateStructGEP(OpenMPIRBuilder::Taskloop, | |
| Value *TaskLb = Builder.CreateStructGEP(OpenMPIRBuilder::Taskloop, |
| OutlinedFn.getArg(1), 5, "gep_lb"); | ||
| Value *LowerBound = Builder.CreateLoad(IVTy, task_lb, "lb"); | ||
|
|
||
| Value *task_ub = Builder.CreateStructGEP(OpenMPIRBuilder::Taskloop, |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
nit: Naming convention is not consistent with rest of file.
| Value *task_ub = Builder.CreateStructGEP(OpenMPIRBuilder::Taskloop, | |
| Value *TaskUb = Builder.CreateStructGEP(OpenMPIRBuilder::Taskloop, |
|
Hi @tblah and @Stylie777, Sorry for the delayed response. |
Added translation support for taskloop construct.